bitkeeper revision 1.1236.1.187 (424d5d8cR-dhzBJoJYstmA_JbmRm5g)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 1 Apr 2005 14:41:16 +0000 (14:41 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 1 Apr 2005 14:41:16 +0000 (14:41 +0000)
Batch cr3 and ldt switches into a single mmuext_op hypercall. This will
also avoid one unnecessary TLB flush per context switch.
Signed-off-by: Keir Fraser <keir@xensource.com>
linux-2.4.29-xen-sparse/include/asm-xen/mmu_context.h
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h

index 2aea8e2f1a0c2710e9984374b5b8b9aa75452467..74004c8d46766a3d77e13bc2cdd6625f0aece5fd 100644 (file)
@@ -31,17 +31,25 @@ extern pgd_t *cur_pgd;
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
 {
+       struct mmuext_op _op[2], *op = _op;
        if (prev != next) {
                /* stop flush ipis for the previous mm */
                clear_bit(cpu, &prev->cpu_vm_mask);
                /* Re-load page tables */
                cur_pgd = next->pgd;
-               xen_pt_switch(__pa(cur_pgd));
+               op->cmd = MMUEXT_NEW_BASEPTR;
+               op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
+               op++;
                /* load_LDT, if either the previous or next thread
                 * has a non-default LDT.
                 */
-               if (next->context.size+prev->context.size)
-                       load_LDT(&next->context);
+               if (next->context.size+prev->context.size) {
+                       op->cmd = MMUEXT_SET_LDT;
+                       op->linear_addr = (unsigned long)next->context.ldt;
+                       op->nr_ents     = next->context.size;
+                       op++;
+               }
+               BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
        }
 }
 
index 4263f25ac5b7d50f9ce7bb45a86442507a92bc02..2150a8dcf9f95cd2831174e0b186de8822801708 100644 (file)
@@ -46,6 +46,7 @@ static inline void switch_mm(struct mm_struct *prev,
                             struct task_struct *tsk)
 {
        int cpu = smp_processor_id();
+       struct mmuext_op _op[2], *op = _op;
 
        if (likely(prev != next)) {
                /* stop flush ipis for the previous mm */
@@ -56,14 +57,24 @@ static inline void switch_mm(struct mm_struct *prev,
 #endif
                cpu_set(cpu, next->cpu_vm_mask);
 
-               /* Re-load page tables */
-               load_cr3(next->pgd);
+               /* Re-load page tables: load_cr3(next->pgd) */
+               per_cpu(cur_pgd, cpu) = next->pgd;
+               op->cmd = MMUEXT_NEW_BASEPTR;
+               op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
+               op++;
 
                /*
                 * load the LDT, if the LDT is different:
                 */
-               if (unlikely(prev->context.ldt != next->context.ldt))
-                       load_LDT_nolock(&next->context, cpu);
+               if (unlikely(prev->context.ldt != next->context.ldt)) {
+                       /* load_LDT_nolock(&next->context, cpu) */
+                       op->cmd = MMUEXT_SET_LDT;
+                       op->linear_addr = (unsigned long)next->context.ldt;
+                       op->nr_ents     = next->context.size;
+                       op++;
+               }
+
+               BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
        }
 #if 0 /* XEN */
        else {